int i, j, max_cpu_id;
uint64_t free_heap;
PyObject *ret_obj, *node_to_cpu_obj, *node_to_memory_obj;
+ PyObject *node_to_dma32_mem_obj;
xc_cpu_to_node_t map[MAX_CPU_ID + 1];
const char *virtcap_names[] = { "hvm", "hvm_directio" };
Py_DECREF(pyint);
}
+ xc_dom_loginit();
+ /* DMA memory. */
+ node_to_dma32_mem_obj = PyList_New(0);
+
+ for ( i = 0; i < info.nr_nodes; i++ )
+ {
+ PyObject *pyint;
+
+ xc_availheap(self->xc_handle, 0, 32, i, &free_heap);
+ xc_dom_printf("Node:%d: DMA32:%ld\n", i, free_heap);
+ pyint = PyInt_FromLong(free_heap / 1024);
+ PyList_Append(node_to_dma32_mem_obj, pyint);
+ Py_DECREF(pyint);
+ }
+
PyDict_SetItemString(ret_obj, "node_to_cpu", node_to_cpu_obj);
Py_DECREF(node_to_cpu_obj);
PyDict_SetItemString(ret_obj, "node_to_memory", node_to_memory_obj);
Py_DECREF(node_to_memory_obj);
+ PyDict_SetItemString(ret_obj, "node_to_dma32_mem", node_to_dma32_mem_obj);
+ Py_DECREF(node_to_dma32_mem_obj);
return ret_obj;
#undef MAX_CPU_ID
def _setCPUAffinity(self):
- """ Repin domain vcpus if a restricted cpus list is provided
+ """ Repin domain vcpus if a restricted cpus list is provided.
+ Returns the choosen node number.
"""
def has_cpus():
return True
return False
+ index = 0
if has_cpumap():
for v in range(0, self.info['VCPUs_max']):
if self.info['vcpus_params'].has_key('cpumap%i' % v):
cpumask = info['node_to_cpu'][index]
for v in range(0, self.info['VCPUs_max']):
xc.vcpu_setaffinity(self.domid, v, cpumask)
+ return index
+
+ def _freeDMAmemory(self, node):
+
+ # If we are PV and have PCI devices the guest will
+ # turn on a SWIOTLB. The SWIOTLB _MUST_ be located in the DMA32
+ # zone (under 4GB). To do so, we need to balloon down Dom0 to where
+ # there is enough (64MB) memory under the 4GB mark. This balloon-ing
+ # might take more memory out than just 64MB thought :-(
+ if not self.info.is_pv_and_has_pci():
+ return
+
+ retries = 2000
+ ask_for_mem = 0;
+ need_mem = 0
+ try:
+ while (retries > 0):
+ physinfo = xc.physinfo()
+ free_mem = physinfo['free_memory']
+ nr_nodes = physinfo['nr_nodes']
+ node_to_dma32_mem = physinfo['node_to_dma32_mem']
+ if (node > nr_nodes):
+ return;
+ # Extra 2MB above 64GB seems to do the trick.
+ need_mem = 64 * 1024 + 2048 - node_to_dma32_mem[node]
+ # our starting point. We ask just for the difference to
+ # be have an extra 64MB under 4GB.
+ ask_for_mem = max(need_mem, ask_for_mem);
+ if (need_mem > 0):
+ log.debug('_freeDMAmemory (%d) Need %dKiB DMA memory. '
+ 'Asking for %dKiB', retries, need_mem,
+ ask_for_mem)
+
+ balloon.free(ask_for_mem, self)
+ ask_for_mem = ask_for_mem + 2048;
+ else:
+ # OK. We got enough DMA memory.
+ break
+ retries = retries - 1
+ except:
+ # This is best-try after all.
+ need_mem = max(1, need_mem);
+ pass
+
+ if (need_mem > 0):
+ log.warn('We tried our best to balloon down DMA memory to '
+ 'accomodate your PV guest. We need %dKiB extra memory.',
+ need_mem)
def _setSchedParams(self):
if XendNode.instance().xenschedinfo() == 'credit':
# repin domain vcpus if a restricted cpus list is provided
# this is done prior to memory allocation to aide in memory
# distribution for NUMA systems.
- self._setCPUAffinity()
+ node = self._setCPUAffinity()
# Set scheduling parameters.
self._setSchedParams()
if self.info.target():
self._setTarget(self.info.target())
+ self._freeDMAmemory(node)
+
self._createDevices()
self.image.cleanupTmpImages()
except:
str='none\n'
return str[:-1];
- def format_node_to_memory(self, pinfo):
+ def format_node_to_memory(self, pinfo, key):
str=''
whitespace=''
try:
- node_to_memory=pinfo['node_to_memory']
+ node_to_memory=pinfo[key]
for i in range(0, pinfo['nr_nodes']):
str+='%snode%d:%d\n' % (whitespace,
i,
info['total_memory'] = info['total_memory'] / 1024
info['free_memory'] = info['free_memory'] / 1024
info['node_to_cpu'] = self.format_node_to_cpu(info)
- info['node_to_memory'] = self.format_node_to_memory(info)
+ info['node_to_memory'] = self.format_node_to_memory(info,
+ 'node_to_memory')
+ info['node_to_dma32_mem'] = self.format_node_to_memory(info,
+ 'node_to_dma32_mem')
ITEM_ORDER = ['nr_cpus',
'nr_nodes',
'total_memory',
'free_memory',
'node_to_cpu',
- 'node_to_memory'
+ 'node_to_memory',
+ 'node_to_dma32_mem'
]
return [[k, info[k]] for k in ITEM_ORDER]